d->arch.cmdline = c->cmdline;
new_thread(v, regs->cr_iip, 0, 0);
-#ifdef CONFIG_IA64_SPLIT_CACHE
- /* Sync d/i cache conservatively */
- if (!running_on_sim) {
- ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
- if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
- printk("PAL CACHE FLUSH failed for dom0.\n");
- else
- printk("Sync i/d cache for guest SUCC\n");
- }
-#endif
+ sync_split_caches();
v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
if ( c->vcpu.privregs && copy_from_user(v->arch.privregs,
c->vcpu.privregs, sizeof(mapped_regs_t))) {
new_thread(v, pkern_entry, 0, 0);
physdev_init_dom0(d);
-#ifdef CONFIG_IA64_SPLIT_CACHE
- /* Sync d/i cache conservatively */
- if (!running_on_sim) {
- ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
- if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
- printk("PAL CACHE FLUSH failed for dom0.\n");
- else
- printk("Sync i/d cache for guest SUCC\n");
- }
-#endif
+ sync_split_caches();
// FIXME: Hack for keyboard input
#ifdef CLONE_DOMAIN0
#endif
new_thread(v, pkern_entry, 0, 0);
printk("new_thread returns\n");
-#ifdef CONFIG_IA64_SPLIT_CACHE
- /* Sync d/i cache conservatively */
- if (!running_on_sim) {
- ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
- if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
- printk("PAL CACHE FLUSH failed for dom0.\n");
- else
- printk("Sync i/d cache for guest SUCC\n");
- }
-#endif
+ sync_split_caches();
__set_bit(0x30, VCPU(v, delivery_mask));
return 0;
v->domain->domain_id);
loaddomainelfimage(v->domain,v->domain->arch.image_start);
new_thread(v, v->domain->arch.entry, 0, 0);
-#ifdef CONFIG_IA64_SPLIT_CACHE
- /* Sync d/i cache conservatively */
- if (!running_on_sim) {
- ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
- if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
- printk("PAL CACHE FLUSH failed for dom0.\n");
- else
- printk("Sync i/d cache for guest SUCC\n");
- }
-#endif
+ sync_split_caches();
}
#endif
goto loop;
}
}
+
+/* FIXME: for the forseeable future, all cpu's that enable VTi have split
+ * caches and all cpu's that have split caches enable VTi. This may
+ * eventually be untrue though. */
+#define cpu_has_split_cache vmx_enabled
+extern unsigned int vmx_enabled;
+
+void sync_split_caches(void)
+{
+ unsigned long ret, progress;
+
+ if (cpu_has_split_cache) {
+ /* Sync d/i cache conservatively */
+ ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
+ if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
+ printk("PAL CACHE FLUSH failed\n");
+ else printk("Sync i/d cache for guest SUCC\n");
+ }
+ else printk("sync_split_caches ignored for CPU with no split cache\n");
+}